import numpy as np
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import pandas as pd
from tensorflow import keras
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot
import plotly.graph_objects as go
import math
import seaborn as sns
from sklearn.metrics import mean_squared_error
np.random.seed(1)
tf.random.set_seed(1)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, GRU, Dropout, RepeatVector, TimeDistributed
from keras import backend
MODELFILENAME = 'MODELS/GRU_1h_TFM_2c'
TIME_STEPS=6 #1h
CMODEL = GRU
MODEL = "GRU"
UNITS=45
DROPOUT1=0.118
DROPOUT2=0.243
ACTIVATION='tanh'
OPTIMIZER='adam'
EPOCHS=43
BATCHSIZE=30
VALIDATIONSPLIT=0.2
# Code to read csv file into Colaboratory:
# from google.colab import files
# uploaded = files.upload()
# import io
# df = pd.read_csv(io.BytesIO(uploaded['SentDATA.csv']))
# Dataset is now stored in a Pandas Dataframe
df = pd.read_csv('../../data/dadesTFM.csv')
df.reset_index(inplace=True)
df['Time'] = pd.to_datetime(df['Time'])
df = df.set_index('Time')
columns = ['PM1','PM25','PM10','PM1ATM','PM25ATM','PM10ATM']
df1 = df.copy();
df1 = df1.rename(columns={"PM 1":"PM1","PM 2.5":"PM25","PM 10":"PM10","PM 1 ATM":"PM1ATM","PM 2.5 ATM":"PM25ATM","PM 10 ATM":"PM10ATM"})
df1['PM1'] = df['PM 1'].astype(np.float32)
df1['PM25'] = df['PM 2.5'].astype(np.float32)
df1['PM10'] = df['PM 10'].astype(np.float32)
df1['PM1ATM'] = df['PM 1 ATM'].astype(np.float32)
df1['PM25ATM'] = df['PM 2.5 ATM'].astype(np.float32)
df1['PM10ATM'] = df['PM 10 ATM'].astype(np.float32)
df2 = df1.copy()
train_size = int(len(df2) * 0.8)
test_size = len(df2) - train_size
train, test = df2.iloc[0:train_size], df2.iloc[train_size:len(df2)]
train.shape, test.shape
((3117, 7), (780, 7))
#Standardize the data
for col in columns:
scaler = StandardScaler()
train[col] = scaler.fit_transform(train[[col]])
<ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]])
def create_sequences(X, y, time_steps=TIME_STEPS):
Xs, ys = [], []
for i in range(len(X)-time_steps):
Xs.append(X.iloc[i:(i+time_steps)].values)
ys.append(y.iloc[i+time_steps])
return np.array(Xs), np.array(ys)
X_train, y_train = create_sequences(train[[columns[1]]], train[columns[1]])
#X_test, y_test = create_sequences(test[[columns[1]]], test[columns[1]])
print(f'X_train shape: {X_train.shape}')
print(f'y_train shape: {y_train.shape}')
X_train shape: (3111, 6, 1) y_train shape: (3111,)
#afegir nova mètrica
def rmse(y_true, y_pred):
return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
model = Sequential()
model.add(CMODEL(units = UNITS, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(rate=DROPOUT1))
model.add(CMODEL(units = UNITS, return_sequences=True))
model.add(Dropout(rate=DROPOUT2))
model.add(TimeDistributed(Dense(1,kernel_initializer='normal',activation=ACTIVATION)))
model.compile(optimizer=OPTIMIZER, loss='mae',metrics=['mse',rmse])
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= gru (GRU) (None, 6, 45) 6480 _________________________________________________________________ dropout (Dropout) (None, 6, 45) 0 _________________________________________________________________ gru_1 (GRU) (None, 6, 45) 12420 _________________________________________________________________ dropout_1 (Dropout) (None, 6, 45) 0 _________________________________________________________________ time_distributed (TimeDistri (None, 6, 1) 46 ================================================================= Total params: 18,946 Trainable params: 18,946 Non-trainable params: 0 _________________________________________________________________
history = model.fit(X_train, y_train, epochs=EPOCHS, batch_size=BATCHSIZE, validation_split=VALIDATIONSPLIT,
callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, mode='min')], shuffle=False)
Epoch 1/43 83/83 [==============================] - 2s 29ms/step - loss: 0.5356 - mse: 0.6034 - rmse: 0.5684 - val_loss: 0.3298 - val_mse: 0.3020 - val_rmse: 0.4018 Epoch 2/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3989 - mse: 0.3726 - rmse: 0.4346 - val_loss: 0.2686 - val_mse: 0.2511 - val_rmse: 0.3158 Epoch 3/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3658 - mse: 0.3365 - rmse: 0.3937 - val_loss: 0.2389 - val_mse: 0.2330 - val_rmse: 0.2695 Epoch 4/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3539 - mse: 0.3250 - rmse: 0.3790 - val_loss: 0.2284 - val_mse: 0.2263 - val_rmse: 0.2508 Epoch 5/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3502 - mse: 0.3211 - rmse: 0.3751 - val_loss: 0.2242 - val_mse: 0.2251 - val_rmse: 0.2445 Epoch 6/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3482 - mse: 0.3195 - rmse: 0.3732 - val_loss: 0.2241 - val_mse: 0.2248 - val_rmse: 0.2432 Epoch 7/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3483 - mse: 0.3187 - rmse: 0.3731 - val_loss: 0.2241 - val_mse: 0.2252 - val_rmse: 0.2435 Epoch 8/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3474 - mse: 0.3182 - rmse: 0.3724 - val_loss: 0.2224 - val_mse: 0.2242 - val_rmse: 0.2415 Epoch 9/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3465 - mse: 0.3180 - rmse: 0.3717 - val_loss: 0.2228 - val_mse: 0.2249 - val_rmse: 0.2424 Epoch 10/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3461 - mse: 0.3176 - rmse: 0.3714 - val_loss: 0.2224 - val_mse: 0.2244 - val_rmse: 0.2420 Epoch 11/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3449 - mse: 0.3174 - rmse: 0.3700 - val_loss: 0.2202 - val_mse: 0.2245 - val_rmse: 0.2401 Epoch 12/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3438 - mse: 0.3173 - rmse: 0.3692 - val_loss: 0.2189 - val_mse: 0.2238 - val_rmse: 0.2386 Epoch 13/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3432 - mse: 0.3167 - rmse: 0.3686 - val_loss: 0.2182 - val_mse: 0.2238 - val_rmse: 0.2384 Epoch 14/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3431 - mse: 0.3170 - rmse: 0.3684 - val_loss: 0.2170 - val_mse: 0.2244 - val_rmse: 0.2378 Epoch 15/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3419 - mse: 0.3167 - rmse: 0.3673 - val_loss: 0.2163 - val_mse: 0.2233 - val_rmse: 0.2366 Epoch 16/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3409 - mse: 0.3163 - rmse: 0.3664 - val_loss: 0.2148 - val_mse: 0.2236 - val_rmse: 0.2354 Epoch 17/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3413 - mse: 0.3165 - rmse: 0.3669 - val_loss: 0.2154 - val_mse: 0.2239 - val_rmse: 0.2362 Epoch 18/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3410 - mse: 0.3167 - rmse: 0.3666 - val_loss: 0.2148 - val_mse: 0.2235 - val_rmse: 0.2352 Epoch 19/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3406 - mse: 0.3162 - rmse: 0.3664 - val_loss: 0.2144 - val_mse: 0.2231 - val_rmse: 0.2350 Epoch 20/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3403 - mse: 0.3162 - rmse: 0.3661 - val_loss: 0.2141 - val_mse: 0.2229 - val_rmse: 0.2345 Epoch 21/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3402 - mse: 0.3162 - rmse: 0.3659 - val_loss: 0.2137 - val_mse: 0.2229 - val_rmse: 0.2346 Epoch 22/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3400 - mse: 0.3156 - rmse: 0.3655 - val_loss: 0.2139 - val_mse: 0.2229 - val_rmse: 0.2346 Epoch 23/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3393 - mse: 0.3155 - rmse: 0.3651 - val_loss: 0.2126 - val_mse: 0.2224 - val_rmse: 0.2333 Epoch 24/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3404 - mse: 0.3160 - rmse: 0.3659 - val_loss: 0.2128 - val_mse: 0.2227 - val_rmse: 0.2335 Epoch 25/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3387 - mse: 0.3149 - rmse: 0.3643 - val_loss: 0.2123 - val_mse: 0.2225 - val_rmse: 0.2331 Epoch 26/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3392 - mse: 0.3152 - rmse: 0.3648 - val_loss: 0.2122 - val_mse: 0.2223 - val_rmse: 0.2329 Epoch 27/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3386 - mse: 0.3155 - rmse: 0.3642 - val_loss: 0.2114 - val_mse: 0.2216 - val_rmse: 0.2316 Epoch 28/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3379 - mse: 0.3147 - rmse: 0.3634 - val_loss: 0.2110 - val_mse: 0.2221 - val_rmse: 0.2315 Epoch 29/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3385 - mse: 0.3144 - rmse: 0.3635 - val_loss: 0.2108 - val_mse: 0.2221 - val_rmse: 0.2311 Epoch 30/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3375 - mse: 0.3138 - rmse: 0.3624 - val_loss: 0.2103 - val_mse: 0.2213 - val_rmse: 0.2305 Epoch 31/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3370 - mse: 0.3139 - rmse: 0.3619 - val_loss: 0.2094 - val_mse: 0.2212 - val_rmse: 0.2298 Epoch 32/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3376 - mse: 0.3132 - rmse: 0.3620 - val_loss: 0.2090 - val_mse: 0.2204 - val_rmse: 0.2287 Epoch 33/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3373 - mse: 0.3138 - rmse: 0.3617 - val_loss: 0.2084 - val_mse: 0.2205 - val_rmse: 0.2285 Epoch 34/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3373 - mse: 0.3138 - rmse: 0.3613 - val_loss: 0.2088 - val_mse: 0.2203 - val_rmse: 0.2282 Epoch 35/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3363 - mse: 0.3129 - rmse: 0.3607 - val_loss: 0.2079 - val_mse: 0.2199 - val_rmse: 0.2269 Epoch 36/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3359 - mse: 0.3129 - rmse: 0.3600 - val_loss: 0.2075 - val_mse: 0.2196 - val_rmse: 0.2268 Epoch 37/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3354 - mse: 0.3122 - rmse: 0.3593 - val_loss: 0.2083 - val_mse: 0.2197 - val_rmse: 0.2277 Epoch 38/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3357 - mse: 0.3122 - rmse: 0.3592 - val_loss: 0.2077 - val_mse: 0.2194 - val_rmse: 0.2269 Epoch 39/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3358 - mse: 0.3121 - rmse: 0.3593 - val_loss: 0.2063 - val_mse: 0.2191 - val_rmse: 0.2250 Epoch 40/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3356 - mse: 0.3122 - rmse: 0.3592 - val_loss: 0.2065 - val_mse: 0.2189 - val_rmse: 0.2255 Epoch 41/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3349 - mse: 0.3119 - rmse: 0.3583 - val_loss: 0.2064 - val_mse: 0.2186 - val_rmse: 0.2250 Epoch 42/43 83/83 [==============================] - 1s 8ms/step - loss: 0.3351 - mse: 0.3119 - rmse: 0.3581 - val_loss: 0.2073 - val_mse: 0.2188 - val_rmse: 0.2258 Epoch 43/43 83/83 [==============================] - 1s 9ms/step - loss: 0.3348 - mse: 0.3110 - rmse: 0.3578 - val_loss: 0.2059 - val_mse: 0.2183 - val_rmse: 0.2245
import matplotlib.pyplot as plt
plt.plot(history.history['loss'], label='MAE Training loss')
plt.plot(history.history['val_loss'], label='MAE Validation loss')
plt.plot(history.history['mse'], label='MSE Training loss')
plt.plot(history.history['val_mse'], label='MSE Validation loss')
plt.plot(history.history['rmse'], label='RMSE Training loss')
plt.plot(history.history['val_rmse'], label='RMSE Validation loss')
plt.legend();
X_train_pred = model.predict(X_train, verbose=0)
train_mae_loss = np.mean(np.abs(X_train_pred - X_train), axis=1)
plt.hist(train_mae_loss, bins=50)
plt.xlabel('Train MAE loss')
plt.ylabel('Number of Samples');
def evaluate_prediction(predictions, actual, model_name):
errors = predictions - actual
mse = np.square(errors).mean()
rmse = np.sqrt(mse)
mae = np.abs(errors).mean()
print(model_name + ':')
print('Mean Absolute Error: {:.4f}'.format(mae))
print('Root Mean Square Error: {:.4f}'.format(rmse))
print('Mean Square Error: {:.4f}'.format(mse))
print('')
return mae,rmse,mse
mae,rmse,mse = evaluate_prediction(X_train_pred, X_train,MODEL)
GRU: Mean Absolute Error: 0.1926 Root Mean Square Error: 0.4313 Mean Square Error: 0.1860
model.save(MODELFILENAME+'.h5')
#càlcul del threshold de test
def calculate_threshold(X_test, X_test_pred):
distance = np.sqrt(np.mean(np.square(X_test_pred - X_test),axis=1))
"""Sorting the scores/diffs and using a 0.80 as cutoff value to pick the threshold"""
distance.sort();
cut_off = int(0.9 * len(distance));
threshold = distance[cut_off];
return threshold
for col in columns:
print ("####################### "+col +" ###########################")
#Standardize the test data
scaler = StandardScaler()
test_cpy = test.copy()
test[col] = scaler.fit_transform(test[[col]])
#creem seqüencia amb finestra temporal per les dades de test
X_test1, y_test1 = create_sequences(test[[col]], test[col])
print(f'Testing shape: {X_test1.shape}')
#evaluem el model
eval = model.evaluate(X_test1, y_test1)
print("evaluate: ",eval)
#predim el model
X_test1_pred = model.predict(X_test1, verbose=0)
evaluate_prediction(X_test1_pred, X_test1,MODEL)
#càlcul del mae_loss
test1_mae_loss = np.mean(np.abs(X_test1_pred - X_test1), axis=1)
test1_rmse_loss = np.sqrt(np.mean(np.square(X_test1_pred - X_test1),axis=1))
# reshaping test prediction
X_test1_predReshape = X_test1_pred.reshape((X_test1_pred.shape[0] * X_test1_pred.shape[1]), X_test1_pred.shape[2])
# reshaping test data
X_test1Reshape = X_test1.reshape((X_test1.shape[0] * X_test1.shape[1]), X_test1.shape[2])
threshold_test = calculate_threshold(X_test1Reshape,X_test1_predReshape)
test1_score_df = pd.DataFrame(test[TIME_STEPS:])
test1_score_df['loss'] = test1_rmse_loss.reshape((-1))
test1_score_df['threshold'] = threshold_test
test1_score_df['anomaly'] = test1_score_df['loss'] > test1_score_df['threshold']
test1_score_df[col] = test[TIME_STEPS:][col]
#gràfic test lost i threshold
fig = go.Figure()
fig.add_trace(go.Scatter(x=test1_score_df.index, y=test1_score_df['loss'], name='Test loss'))
fig.add_trace(go.Scatter(x=test1_score_df.index, y=test1_score_df['threshold'], name='Threshold'))
fig.update_layout(showlegend=True, title='Test loss vs. Threshold')
fig.show()
#Posem les anomalies en un array
anomalies1 = test1_score_df.loc[test1_score_df['anomaly'] == True]
anomalies1.shape
print('anomalies: ',anomalies1.shape); print();
#Gràfic dels punts i de les anomalíes amb els valors de dades transformades per verificar que la normalització que s'ha fet no distorssiona les dades
fig = go.Figure()
fig.add_trace(go.Scatter(x=test1_score_df.index, y=scaler.inverse_transform(test1_score_df[col]), name=col))
fig.add_trace(go.Scatter(x=anomalies1.index, y=scaler.inverse_transform(anomalies1[col]), mode='markers', name='Anomaly'))
fig.update_layout(showlegend=True, title='Detected anomalies')
fig.show()
print ("######################################################")
####################### PM1 ########################### Testing shape: (774, 6, 1)
<ipython-input-17-e1f1d6df3b5c>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy test[col] = scaler.fit_transform(test[[col]])
25/25 [==============================] - 0s 2ms/step - loss: 0.3903 - mse: 0.6120 - rmse: 0.4242 evaluate: [0.39027905464172363, 0.6119558811187744, 0.42416447401046753] GRU: Mean Absolute Error: 0.2019 Root Mean Square Error: 0.5885 Mean Square Error: 0.3464
anomalies: (119, 10)
###################################################### ####################### PM25 ########################### Testing shape: (774, 6, 1)
<ipython-input-17-e1f1d6df3b5c>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
25/25 [==============================] - 0s 2ms/step - loss: 0.4011 - mse: 0.5480 - rmse: 0.4363 evaluate: [0.4011004567146301, 0.5480117797851562, 0.4362846910953522] GRU: Mean Absolute Error: 0.2078 Root Mean Square Error: 0.5459 Mean Square Error: 0.2980
anomalies: (95, 10)
###################################################### ####################### PM10 ###########################
<ipython-input-17-e1f1d6df3b5c>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
Testing shape: (774, 6, 1) 25/25 [==============================] - 0s 2ms/step - loss: 0.4099 - mse: 0.5184 - rmse: 0.4459 evaluate: [0.4099130928516388, 0.5184136629104614, 0.4459022581577301] GRU: Mean Absolute Error: 0.2123 Root Mean Square Error: 0.5087 Mean Square Error: 0.2588
anomalies: (92, 10)
###################################################### ####################### PM1ATM ########################### Testing shape: (774, 6, 1)
<ipython-input-17-e1f1d6df3b5c>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
25/25 [==============================] - 0s 2ms/step - loss: 0.4111 - mse: 0.5502 - rmse: 0.4480 evaluate: [0.41114652156829834, 0.550229549407959, 0.4480241537094116] GRU: Mean Absolute Error: 0.2070 Root Mean Square Error: 0.5026 Mean Square Error: 0.2526
anomalies: (93, 10)
###################################################### ####################### PM25ATM ###########################
<ipython-input-17-e1f1d6df3b5c>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
Testing shape: (774, 6, 1) 25/25 [==============================] - 0s 2ms/step - loss: 0.4080 - mse: 0.5586 - rmse: 0.4447 evaluate: [0.4079887866973877, 0.5586192607879639, 0.44474855065345764] GRU: Mean Absolute Error: 0.2058 Root Mean Square Error: 0.5146 Mean Square Error: 0.2648
anomalies: (93, 10)
###################################################### ####################### PM10ATM ########################### Testing shape: (774, 6, 1)
<ipython-input-17-e1f1d6df3b5c>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
25/25 [==============================] - 0s 2ms/step - loss: 0.4083 - mse: 0.5338 - rmse: 0.4441 evaluate: [0.4083302319049835, 0.5338190793991089, 0.44407832622528076] GRU: Mean Absolute Error: 0.2112 Root Mean Square Error: 0.5199 Mean Square Error: 0.2703
anomalies: (92, 10)
######################################################